Testing the different components of the enveloppe

Motion Clouds: testing components of the envelope

In [46]:
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
  Click me!
In [47]:
def show_mc(name):
    import os
    from IPython.display import display, Image
    if os.path.isfile(name + mc.ext):
        display(Image(filename=name + mc.ext))
    if os.path.isfile(name + '_cube' + mc.ext):
        display(Image(filename=name + '_cube' + mc.ext))
    from IPython.display import HTML
    from base64 import b64encode
    video = open(name + mc.vext, "rb").read()
    video_encoded = b64encode(video)
    video_tag = '<video controls  autoplay="autoplay" loop="loop" width=50% src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
    display(HTML(data=video_tag))
  Click me!

Testing the color

In [48]:
name = 'color'
z = mc.envelope_color(fx, fy, ft)
if mc.anim_exist(mc.figpath + name): mc.figures(z, mc.figpath + name)
show_mc(mc.figpath + name)
  Click me!
In [48]:
  Click me!
In [48]:
  Click me!
In [48]:
  Click me!
In [49]:
# explore parameters
for alpha in [0.0, 0.5, 1.0, 1.5, 2.0]:
    # resp. white(0), pink(1), red(2) or brownian noise (see http://en.wikipedia.org/wiki/1/f_noise
    name_ = mc.figpath + name + '-alpha-' + str(alpha).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, alpha)
        mc.figures(z, name_)
    show_mc(name_)
  Click me!
In [50]:
for ft_0 in [0.125, 0.25, 0.5, 1., 2., 4.]:# time space scaling
    name_ = mc.figpath + name + '-ft_0-' + str(ft_0).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
        mc.figures(z, name_)
    show_mc(name_)
  Click me!
In [51]:
for contrast in [0.1, 0.25, 0.5, 0.75, 1.0, 2.0]:
    name_ = mc.figpath + name + '-contrast-' + str(contrast).replace('.', '_')
    if mc.anim_exist(name_):
        im = mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft)), contrast)
        mc.anim_save(im, name_, display=False)
    show_mc(name_)
  Click me!
In [52]:
for contrast in [0.1, 0.25, 0.5, 0.75, 1.0, 2.0]:
    name_ = mc.figpath + name + '-energy_contrast-' + str(contrast).replace('.', '_')
    if mc.anim_exist(name_):
        im = mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft)), contrast, method='energy')
        mc.anim_save(im, name_, display=False)
    show_mc(name_)
  Click me!
In [53]:
for seed in [123456 + step for step in range(7)]:
    name_ = mc.figpath + name + '-seed-' + str(seed)
    if mc.anim_exist(name_):
        mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), name_, display=False)
    show_mc(name_)
  Click me!
In [54]:
for size in range(5, 7):
    N_X, N_Y, N_frame = 2**size, 2**size, 2**size
    fx, fy, ft = mc.get_grids(N_X, N_Y, N_frame)
    ft_0 = N_X/float(N_frame)
    name_ = mc.figpath + name + '-size-' + str(size).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
        mc.figures(z, name_)
    show_mc(name_)
  Click me!
In [55]:
for size in range(5, 7):
    N_frame = 2**size
    fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, N_frame)
    ft_0 = N_X/float(N_frame)
    name_ = mc.figpath + name + '-size_T-' + str(size).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
        mc.figures(z, name_, do_figs=False)
    show_mc(name_)
  Click me!
In [56]:
#name = 'colorfull'
#N = 256 #512
#fx, fy, ft = mc.get_grids(N, N, N)
#for seed in [123456 + step for step in range(1)]:
#    if mc.anim_exist(mc.figpath + name):
#        mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), mc.figpath + name, display=False)
#        mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), mc.figpath + name, display=False, vext='.mat')
  Click me!

Testing the speed

In []:
  Click me!

Testing competing Motion Clouds with psychopy

Analysis of a discrimination experiment

In the psychopy_competition.py script, we implemented an experiment to test whether one could discriminate the dominating motionin a sum of two motion cloudsin opposite directions.

Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.

The experiment

In [1]:
#%pycat psychopy_competing.py
  Click me!

Analysis, version 1

In a first version of the experiment, we only stored the results in a numpy file:

In [2]:
!ls  data/competing_v1_*npy
  Click me!
data/competing_v1_bruno_Dec_14_1210.npy  data/competing_v1_lup_Dec_12_1003.npy	data/competing_v1_lup_Dec_12_1013.npy  data/competing_v1_lup_Dec_14_1201.npy  data/competing_v1_meduz_Dec_14_1204.npy

In [3]:
import glob
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('data/competing_v1_*npy'):
    results = np.load(fn)
    print 'experiment ', fn, ', # trials=', results.shape[1]
    ax.scatter(results[1, :], results[0, :])
_ = ax.axis([0., 1., -1.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax.set_ylabel('perceived direction')
  Click me!
experiment  data/competing_v1_bruno_Dec_14_1210.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_12_1003.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_12_1013.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_14_1201.npy , # trials= 10
experiment  data/competing_v1_meduz_Dec_14_1204.npy , # trials= 50

In [4]:
alpha = .3
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
data = []
for fn in glob.glob('data/competing_v1_*npy'):
    results = np.load(fn)
    data_ = np.empty(results.shape)
    # lower stronger, detected lower = CORRECT is 1
    ax1.scatter(results[1, results[1, :]<.5], 
                1.*(results[0, results[1, :]<.5]==-1), 
                c='r', alpha=alpha)
    # copying data: contrast (from .5 to 1), correct 
    data_[0, results[1, :]<.5] = 1. - results[1, results[1, :]<.5]
    data_[1, results[1, :]<.5] = 1.*(results[0, results[1, :]<.5]==-1)
    # upper stronger, detected lower = INCORRECT is 1
    ax1.scatter(results[1, results[1, :]>.5], 
                1.*(results[0, results[1, :]>.5]==-1), 
                c='b', alpha=alpha)
    # lower stronger, detected upper = INCORRECT is 1
    ax2.scatter(results[1, results[1, :]<.5], 
                1.*(results[0, results[1, :]<.5]==1), 
                c='b', alpha=alpha, marker='x')
    # upper stronger, detected upper = CORRECT is 1
    ax2.scatter(results[1, results[1, :]>.5], 
                1.*(results[0, results[1, :]>.5]==1), 
                c='r', alpha=alpha, marker='x')
    # copying data: contrast (from .5 to 1), correct 
    data_[0, results[1, :]>=.5] = results[1, results[1, :]>=.5]
    data_[1, results[1, :]>=.5] = 1.*(results[0, results[1, :]>=.5]==1)
    data.append(data_)

for ax in [ax1, ax2]:
    ax.axis([0., 1., -.1, 1.1])
    _ = ax.set_xlabel('contrast')
_ = ax1.set_ylabel('detected lower')
_ = ax1.legend(['lower', 'upper'], loc='right')
_ = ax2.set_ylabel('detected upper')
_ = ax2.legend(['lower', 'upper'], loc='right')
  Click me!

(note the subplots are equivalent up to a flip)

One could not fit psychometric curves for this 2AFC. See for instance https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3110332/

In [30]:
n_hist = 10
C_bins = np.linspace(0.5, 1., n_hist)
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
for data_ in data:
    hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
    hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
    plt.bar(bins[:-1] + (bins[1]-bins[0])/2, (1.*hist_correct)/(1.*hist_total), width=.95*(bins[1]-bins[0]), alpha=.1)
_ = ax.axis([0.5, 1., 0., 1.1])
_ = ax.set_xlabel('contrast')
  Click me!

let's explore another way:

In [31]:
import pypsignifit as psi
  Click me!

data : an array or a list of lists containing stimulus intensities in the first column, number of correct responses (nAFC) or number of YES- responses in the second column, and number of trials in the third column. Each row should correspond to one experimental block. In addition, the sequence of the rows is taken as the sequence of data aquisition. Alternatively, the relative frequencies of correct responses resp YES responses can be given.

In [32]:
stimulus_intensities, percent_correct = np.array([]), np.array([])
for data_ in data:
    #print data_.shape
    #stimulus_intensities = np.hstack((stimulus_intensities, data_[0, : ]))
    #percent_correct = np.hstack((percent_correct, data_[1, : ]))
    #print data_[0, : ].min(), data_[0, : ].max(), data_[1, : ].mean()
    hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
    hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
    stimulus_intensities = np.hstack((stimulus_intensities, bins[:-1] + (bins[1]-bins[0])/2))
    percent_correct = np.hstack((percent_correct, (1.*hist_correct)/(1.*hist_total)))

num_observations = len(stimulus_intensities)
print 'num_observations: ', num_observations
psidata = np.vstack((stimulus_intensities, percent_correct, np.array([2]*num_observations)))
  Click me!
num_observations:  45

BootstrapInference

In [34]:
# see http://psignifit.sourceforge.net/DESIGN.html
nafc = 2
constraints = ( 'unconstrained', 'unconstrained', 'Beta(2,20)')
B = psi.BootstrapInference ( psidata, priors=constraints, nafc=nafc )
# (threshold, slope, lapse rate)
print B.estimate
  Click me!
[ 0.49862663  0.03601899  0.04546066]

How well do these parameters describe the data? The deviance is a measure that describes the goodness of fit for a model, based on the sum of the squares error metric:

In [35]:
print B.deviance, B.getThres()#, B.getCI(1)
  Click me!
0.186105675197 0.498626633193

In [36]:
_ = psi.psigniplot.plotPMF(B, xlabel_text='contrast', showaxes=True)
  Click me!
In [37]:
B.sample()
  Click me!
In [38]:
psi.GoodnessOfFit(B)
  Click me!
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-38-730472e77ec6> in <module>()
----> 1 psi.GoodnessOfFit(B)

/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.pyc in GoodnessOfFit(InferenceObject, warn)
    575     if infer in ["BayesInference","ASIRInference"]:
    576         InferenceObject.drawposteriorexamples ( ax=ax_plot )
--> 577     plotThres ( InferenceObject, ax=ax_plot )
    578     plotPMF   ( InferenceObject, ax=ax_plot, showdesc=True )
    579     if infer in ["BayesInference","ASIRInference"]:

/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.pyc in plotThres(InferenceObject, ax, color)
    480 
    481     for k,cut in enumerate(InferenceObject.cuts):
--> 482         c25,c975 = InferenceObject.getCI ( cut=k, conf=(.025,.975) )
    483         thres = InferenceObject.getThres ( cut )
    484         ylev = InferenceObject.evaluate ( [thres] )

/usr/local/lib/python2.7/site-packages/pypsignifit/psignidata.pyc in getCI(self, cut, conf, thres_or_slope)
    452                 vals.append(stats.norm.cdf( bias + ( stats.norm.ppf(pp) + bias ) / (1-acc*(stats.norm.ppf(pp) + bias )) ))
    453 
--> 454             return p.prctile ( self.__bthres[:,cut], 100*N.array(vals) )
    455         elif thres_or_slope[0] == "s":
    456             bias = self.__sl_bias[cut]

/usr/local/lib/python2.7/site-packages/matplotlib/mlab.pyc in prctile(x, p)
    953         frac[cond] += 1
    954 
--> 955     return _interpolate(values[ai],values[bi],frac)
    956 
    957 def prctile_rank(x, p):

IndexError: index -9223372036854775808 is out of bounds for size 2000
  • Top left: displays the given data points and the fitted psychometric function. Thresholds and confidence intervals are plotted at three levels (default: 25%, 50% and 75% ). Mind that the y-axis starts at 0.5 (the guess rate in a 2AFC task), therefore the 50% threshold is located at . signifies the deviance value.
  • Bottom left: histogram of bootstrapped deviance values (default = 2000 samples). The 95% confidence limits are indicated by red dotted lines and the actually observed deviance is indicated by the solid red line. If the observed deviance is outside the 95% confidence limits that indicates a bad fit and you will receive a warning message.
  • Top middle: deviance residuals are plotted as a function of the predicted correct response rate of the model (x-axis corresponds to y-axis in panel 1). This plot helps you to detect systematic deviations between the model and the data. The dotted line is the best linear fit that relates deviance residuals to the predicted correct response rate. Rpd gives the numerical value of that correlation. Note that the residuals are scaled to account for differences in the variability of a binomially distributed random variable (e.g. maximum variance at p=0.5).
  • Bottom middle: histogram of bootstrapped correlation coefficients for the correlation between residuals and performance level (same logic applies as in panel 2). Dotted lines denote 95% intervals of the sampled correlation coefficients, the solid line marks the observed correlation between model prediction and deviance residuals.
  • Top right: deviance residuals are plotted as a function of block index i.e. the sequence in which the data were acquired (WARNING: this graph can be properly interpreted only when stimulus intensities were fixed in separate blocks). If the observer was learning, the fitted linear correlation between residuals and block index should be positive.
  • Bottom right: histogram of bootstrapped correlation coefficients for the correlation between deviance residuals and block index (same logic applies as in panel 2 and 4).
In [13]:
priors = ( 'Gauss(0,5)', 'Gamma(1,3)', 'Beta(2,20)' )
  Click me!
In [14]:
mcmc = psi.BayesInference ( psidata, priors=priors, nafc=nafc )
  Click me!
Acceptance: 0.943333333333
Acceptance:
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: divide by zero encountered in log
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: invalid value encountered in multiply
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: invalid value encountered in divide
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:60: RuntimeWarning: invalid value encountered in double_scalars
  fitted[i,j,k] = T[i,j,:].sum() * T[:,j,k].sum() / T[:,j,:].sum()
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:94: RuntimeWarning: divide by zero encountered in log
  G2 = 2 * sum ( T*log(T/fitted) )

 0.541666666667
Acceptance: 0.525
Acceptance: 0.506015960712

/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:94: RuntimeWarning: invalid value encountered in multiply
  G2 = 2 * sum ( T*log(T/fitted) )

In [15]:
mcmc.estimate
  Click me!
Out[15]:
array([-1.63854678,  2.99021992,  0.0866207 ])
In [16]:
psi.ConvergenceMCMC ( mcmc )
  Click me!
/usr/local/lib/python2.7/site-packages/pypsignifit/psignidata.py:1330: RuntimeWarning: invalid value encountered in double_scalars
  B = n * sum( (psi_i-psi_mean)**2 ) / (m-1)

In [17]:
psi.GoodnessOfFit ( mcmc )
  Click me!
/usr/local/lib/python2.7/site-packages/matplotlib/lines.py:503: RuntimeWarning: invalid value encountered in greater_equal
  return np.alltrue(x[1:] - x[0:-1] >= 0)
/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.py:267: RuntimeWarning: invalid value encountered in greater_equal
  pval = N.mean( (simdata-observed)>=0 )

In [18]:
 psi.ParameterPlot ( mcmc )
  Click me!
Out[18]:
[<matplotlib.axes.Axes at 0x10cd42610>,
 <matplotlib.axes.Axes at 0x10ce1aa90>,
 <matplotlib.axes.Axes at 0x10cef8810>]
In [19]:
psi.plotInfluential(mcmc)
  Click me!
In [19]:
  Click me!
In []:
  Click me!

Testing basic functions of Motion Clouds

Motion Clouds: raw principles

In [1]:
def show_video(filename):
    from IPython.display import display, Image, HTML
    from base64 import b64encode
    video = open(mc.figpath + filename + mc.vext, "rb").read()
    video_encoded = b64encode(video)
    video_tag = '<video controls  autoplay="autoplay" loop="loop" width=350px src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
    display(HTML(data=video_tag))
  Click me!

Introduction

Motion Clouds are synthesized textures which aim at having similar characteristics as natural images but with controlled parameters. There are many ways to achieve these results and this notebook aims at showing that different procedures from different communities (neurioscience, modelling, computer vision, ...) may produce similar results.

Using mixtures of images

Using Fourier ("official Motion Clouds")

In [2]:
import MotionClouds as mc

fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
  Click me!

Using ARMA processes

In [3]:
import scipy.misc
lena = scipy.misc.lena() * 1.
lena -= lena.mean()
lena /= lena.std()
print lena.shape
  Click me!
(512, 512)

In [4]:
imshow(lena, cmap=cm.gray)
  Click me!
Out[4]:
<matplotlib.image.AxesImage at 0x112255fd0>
In [5]:
def noise(image=lena):
    for axis in [0, 1]:
        image = np.roll(image, np.random.randint(image.shape[axis]), axis=axis)
    return image
  Click me!
In [6]:
imshow(noise(), cmap=cm.gray)
  Click me!
Out[6]:
<matplotlib.image.AxesImage at 0x112693f50>
In [7]:
imshow(noise(), cmap=cm.gray)
  Click me!
Out[7]:
<matplotlib.image.AxesImage at 0x11426a150>

Now, we define the ARMA process as an averaging process with a certain time constant \(\tau=30.\) (in frames).

In [8]:
def ARMA(image, tau=30.):
    image = (1 - 1/tau)* image + 1/tau * noise()
    return image
  Click me!

initializing

In [9]:
image = ARMA(lena)
imshow(image, cmap=cm.gray)
  Click me!
Out[9]:
<matplotlib.image.AxesImage at 0x11471bcd0>
In [10]:
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
  Click me!
Out[10]:
<matplotlib.image.AxesImage at 0x114bd9e50>
In [11]:
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
  Click me!
Out[11]:
<matplotlib.image.AxesImage at 0x11528f390>
In [12]:
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
  Click me!
Out[12]:
<matplotlib.image.AxesImage at 0x1152c1890>
In [15]:
N_frame = 1024
z = np.zeros((lena.shape[0], lena.shape[1], N_frame))
z[:, :, 0] = image
for i_frame in range(1, N_frame): 
    z[:, :, i_frame] = ARMA(z[:, :, i_frame-1])
mc.anim_save(.5 + .5*z, filename='results/arma')
  Click me!
calculating   0% |                                             | ETA:  --:--:--
calculating   1% |                                              | ETA:  0:01:02
calculating   2% |                                              | ETA:  0:01:00
calculating   3% |#                                             | ETA:  0:00:58
calculating   4% |#                                             | ETA:  0:00:57
calculating   5% |##                                            | ETA:  0:00:57
calculating   6% |##                                            | ETA:  0:00:56
calculating   7% |###                                           | ETA:  0:00:55
calculating   8% |###                                           | ETA:  0:00:55
calculating   9% |####                                          | ETA:  0:00:54
calculating  10% |####                                          | ETA:  0:00:54
calculating  11% |#####                                         | ETA:  0:00:53
calculating  12% |#####                                         | ETA:  0:00:52
calculating  13% |######                                        | ETA:  0:00:51
calculating  15% |######                                        | ETA:  0:00:51
calculating  16% |#######                                       | ETA:  0:00:50
calculating  17% |#######                                       | ETA:  0:00:49
calculating  18% |########                                      | ETA:  0:00:49
calculating  19% |########                                      | ETA:  0:00:48
calculating  20% |#########                                     | ETA:  0:00:47
calculating  21% |#########                                     | ETA:  0:00:47
calculating  22% |##########                                    | ETA:  0:00:46
calculating  23% |##########                                    | ETA:  0:00:45
calculating  24% |###########                                   | ETA:  0:00:44
calculating  25% |###########                                   | ETA:  0:00:44
calculating  26% |############                                  | ETA:  0:00:43
calculating  27% |############                                  | ETA:  0:00:42
calculating  29% |#############                                 | ETA:  0:00:42
calculating  30% |#############                                 | ETA:  0:00:41
calculating  31% |##############                                | ETA:  0:00:41
calculating  32% |##############                                | ETA:  0:00:40
calculating  33% |###############                               | ETA:  0:00:39
calculating  34% |###############                               | ETA:  0:00:39
calculating  35% |################                              | ETA:  0:00:38
calculating  36% |################                              | ETA:  0:00:37
calculating  37% |#################                             | ETA:  0:00:37
calculating  38% |#################                             | ETA:  0:00:36
calculating  39% |##################                            | ETA:  0:00:35
calculating  40% |##################                            | ETA:  0:00:35
calculating  41% |###################                           | ETA:  0:00:34
calculating  42% |###################                           | ETA:  0:00:33
calculating  44% |####################                          | ETA:  0:00:33
calculating  45% |####################                          | ETA:  0:00:32
calculating  46% |#####################                         | ETA:  0:00:31
calculating  47% |#####################                         | ETA:  0:00:31
calculating  48% |######################                        | ETA:  0:00:30
calculating  49% |######################                        | ETA:  0:00:30
calculating  50% |#######################                       | ETA:  0:00:29
calculating  51% |#######################                       | ETA:  0:00:28
calculating  52% |########################                      | ETA:  0:00:28
calculating  53% |########################                      | ETA:  0:00:27
calculating  54% |#########################                     | ETA:  0:00:26
calculating  55% |#########################                     | ETA:  0:00:26
calculating  56% |##########################                    | ETA:  0:00:25
calculating  58% |##########################                    | ETA:  0:00:25
calculating  59% |###########################                   | ETA:  0:00:24
calculating  60% |###########################                   | ETA:  0:00:23
calculating  61% |############################                  | ETA:  0:00:23
calculating  62% |############################                  | ETA:  0:00:22
calculating  63% |#############################                 | ETA:  0:00:21
calculating  64% |#############################                 | ETA:  0:00:21
calculating  65% |##############################                | ETA:  0:00:20
calculating  66% |##############################                | ETA:  0:00:19
calculating  67% |###############################               | ETA:  0:00:19
calculating  68% |###############################               | ETA:  0:00:18
calculating  69% |################################              | ETA:  0:00:17
calculating  70% |################################              | ETA:  0:00:17
calculating  71% |#################################             | ETA:  0:00:16
calculating  73% |#################################             | ETA:  0:00:16
calculating  74% |##################################            | ETA:  0:00:15
calculating  75% |##################################            | ETA:  0:00:14
calculating  76% |###################################           | ETA:  0:00:14
calculating  77% |###################################           | ETA:  0:00:13
calculating  78% |####################################          | ETA:  0:00:12
calculating  79% |####################################          | ETA:  0:00:12
calculating  80% |#####################################         | ETA:  0:00:11
calculating  81% |#####################################         | ETA:  0:00:10
calculating  82% |######################################        | ETA:  0:00:10
calculating  83% |######################################        | ETA:  0:00:09
calculating  84% |#######################################       | ETA:  0:00:08
calculating  85% |#######################################       | ETA:  0:00:08
calculating  87% |########################################      | ETA:  0:00:07
calculating  88% |########################################      | ETA:  0:00:07
calculating  89% |#########################################     | ETA:  0:00:06
calculating  90% |#########################################     | ETA:  0:00:05
calculating  91% |##########################################    | ETA:  0:00:05
calculating  92% |##########################################    | ETA:  0:00:04
calculating  93% |##########################################    | ETA:  0:00:03
calculating  94% |###########################################   | ETA:  0:00:03
calculating  95% |###########################################   | ETA:  0:00:02
calculating  96% |############################################  | ETA:  0:00:01
calculating  97% |############################################  | ETA:  0:00:01
calculating  98% |############################################# | ETA:  0:00:00
calculating  99% |############################################# | ETA:  0:00:00
Saving sequence results/arma.mp4

calculating 100% |##############################################| Time: 0:00:59

In [16]:
show_video(filename='arma')
  Click me!
In [14]:
  Click me!

Testing discrimination between Motion Clouds with psychopy

Analysis of a discrimination experiment

In the psychopy_discrimination.py script, we implemented an experiment to test whether the overall shape of motion clouds could influence discrimination of speed.

Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.

The experiment

In [6]:
#%pycat psychopy_discrimination.py
  Click me!

Analysis - version 1

In a first version of the experiment, we only stored the results in a numpy file.

In [7]:
!ls  data/discriminating_*
  Click me!
data/discriminating_john_Jan_23_1515.npy  data/discriminating_lup_Jan_23_1401.npy  data/discriminating_lup_Jan_24_0914.npy  data/discriminating_lup_Jan_24_0931.npy  data/discriminating_v2_lup_Feb_07_1409.pickle
data/discriminating_lup.pickle		  data/discriminating_lup_Jan_23_1735.npy  data/discriminating_lup_Jan_24_0919.npy  data/discriminating_lup_Jan_24_0937.npy  data/discriminating_v2_lup_Feb_07_1434.pickle

In [8]:
import glob
for fn in glob.glob('data/discriminating_*npy'):
    results = np.load(fn)
    print fn, results.shape
    print('analyzing results')
    print '# of trials :', numpy.abs(results).sum(axis=1)
    print 'average results: ', (results.sum(axis=1)/numpy.abs(results).sum(axis=1)*.5+.5)
  Click me!
data/discriminating_john_Jan_23_1515.npy (2, 50)
analyzing results
# of trials : [ 50.          24.50835188]
average results:  [ 0.48  1.  ]
data/discriminating_lup_Jan_23_1401.npy (2, 50)
analyzing results
# of trials : [ 50.          28.12570981]
average results:  [ 0.66  1.  ]
data/discriminating_lup_Jan_23_1735.npy (3, 50)
analyzing results
# of trials : [  9.  14.  13.]
average results:  [ 1.  1.  1.]
data/discriminating_lup_Jan_24_0914.npy (3, 50)
analyzing results
# of trials : [ 17.  21.  12.]
average results:  [ 0.64705882  0.85714286  1.        ]
data/discriminating_lup_Jan_24_0919.npy (3, 50)
analyzing results
# of trials : [ 10.  25.  15.]
average results:  [ 0.7         0.32        0.53333333]
data/discriminating_lup_Jan_24_0931.npy (7, 50)
analyzing results
# of trials : [  3.   4.   8.   8.   7.  12.   8.]
average results:  [ 0.66666667  1.          0.625       0.375       1.          0.16666667
  0.125     ]
data/discriminating_lup_Jan_24_0937.npy (7, 50)
analyzing results
# of trials : [  7.   5.   6.  12.  10.   2.   8.]
average results:  [ 0.85714286  0.8         0.83333333  0.41666667  0.2         1.          0.375     ]

In [9]:
%whos
  Click me!
Variable   Type       Data/Info
-------------------------------
fn         str        data/discriminating_lup_Jan_24_0937.npy
glob       module     <module 'glob' from '/usr<...>/lib/python2.7/glob.pyc'>
results    ndarray    7x50: 350 elems, type `float64`, 2800 bytes

Another solution is to use:

In [10]:
data= 1
np.savez('toto', data=data, results=results)
print np.load('toto.npz')['data']
  Click me!
1

or directly psychopy.misc:

In [11]:
from psychopy import misc
  Click me!
In [12]:
info = misc.fromFile('data/discriminating.pickle')
  Click me!
In [13]:
alphas = [-1., -.5, 0., 0.5, 1., 1.5, 2.]
fileName = 'data/discriminating_' + info['observer'] + '.pickle'
info['alphas'] = alphas
info['results'] = results
#numpy.savez(fileName, results=results, alphas=alphas)
misc.toFile(fileName, info)
  Click me!

Analysis - version 2

In the second version, we stored more data:

In [14]:
import glob
from psychopy import misc
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('data/discriminating_v2_*pickle'):
    data = misc.fromFile(fn)
    print fn, results.shape
    print('analyzing results')
    alphas = np.array(data['alphas'])
    print ' alphas = ', alphas
    print '# of trials :', numpy.abs(data['results']).sum(axis=1)
    print 'average results: ', (data['results'].sum(axis=1)/numpy.abs(data['results']).sum(axis=1)*.5+.5)
    width = (alphas.max()-alphas.min())/len(alphas)
    ax.bar(data['alphas'] - width/2, (data['results'].sum(axis=1)/numpy.abs(data['results']).sum(axis=1)*.5+.5), width=width, alpha=.3 )
#    for i_trial in xrange(data['results'].shape[1]):
#        ax.scatter(data['alphas'][data['results']
ax.set_xlabel('alpha')
ax.set_ylabel('% correct')
  Click me!
data/discriminating_v2_lup_Feb_07_1409.pickle (7, 50)
analyzing results
 alphas =  [-1.  -0.5  0.   0.5  1.   1.5  2. ]
# of trials : [ 10.   4.   5.  11.   6.  11.   3.]
average results:  [ 0.7         1.          0.4         0.54545455  0.83333333  0.27272727
  1.        ]
data/discriminating_v2_lup_Feb_07_1434.pickle (7, 50)
analyzing results
 alphas =  [-1.  -0.5  0.   0.5  1.   1.5  2. ]
# of trials : [  4.   3.  10.  12.   8.   8.   5.]
average results:  [ 0.75        0.66666667  0.8         0.5         0.125       0.25        0.4       ]

Out[14]:
<matplotlib.text.Text at 0x10f75e850>
In [16]:
# TODO : correct for the number of trials / make global reading / make a new draw of alphas
# TODO : test f_0
  Click me!
In [15]:
  Click me!
Contents © 2014 Laurent Perrinet - Powered by Nikola Creative Commons License BY-NC-SA ¬